if ( (page->flags & PG_type_mask) != PGT_gdt_page )
{
- if ( page->type_count != 0 )
+ if ( page_type_count(page) != 0 )
goto out;
/* Check all potential GDT entries in the page. */
page = frame_table + pfn;
ASSERT((page->flags & PG_type_mask) == PGT_gdt_page);
ASSERT((page->flags & PG_domain_mask) == p->domain);
- ASSERT((page->type_count != 0) && (page->tot_count != 0));
+ ASSERT((page_type_count(page) != 0) && (page_tot_count(page) != 0));
put_page_type(page);
put_page_tot(page);
}
case PGT_writeable_page:
break;
default:
- if ( page->type_count != 0 )
+ if ( page_type_count(page) != 0 )
goto out;
}
op.u.getpageframeinfo.domain = page->flags & PG_domain_mask;
op.u.getpageframeinfo.type = NONE;
- if ( page->type_count != 0 )
+ if ( page_type_count(page) != 0 )
{
switch ( page->flags & PG_type_mask )
{
/* Get a free page and add it to the domain's page list. */
pf = list_entry(temp, struct pfn_info, list);
pf->flags |= p->domain;
- pf->type_count = pf->tot_count = 0;
+ set_page_type_count(pf, 0);
+ set_page_tot_count(pf, 0);
temp = temp->next;
list_del(&pf->list);
list_add_tail(&pf->list, &p->pg_head);
}
pf = &frame_table[mpfn];
- if ( (pf->type_count != 0) ||
- (pf->tot_count != 0) ||
+ if ( (page_type_count(pf) != 0) ||
+ (page_tot_count(pf) != 0) ||
((pf->flags & PG_domain_mask) != p->domain) )
{
DPRINTK("Bad page free for domain %d (%ld, %ld, %08lx)\n",
- p->domain, pf->type_count, pf->tot_count, pf->flags);
+ p->domain, page_type_count(pf),
+ page_tot_count(pf), pf->flags);
rc = -EINVAL;
goto out;
}
{
pf = list_entry(temp, struct pfn_info, list);
pf->flags = p->domain;
- pf->type_count = pf->tot_count = 0;
+ set_page_type_count(pf, 0);
+ set_page_tot_count(pf, 0);
temp = temp->next;
list_del(&pf->list);
list_add_tail(&pf->list, &p->pg_head);
while ( (ent = p->pg_head.next) != &p->pg_head )
{
struct pfn_info *pf = list_entry(ent, struct pfn_info, list);
- pf->type_count = pf->tot_count = pf->flags = 0;
+ set_page_type_count(pf, 0);
+ set_page_tot_count(pf, 0);
+ pf->flags = 0;
ASSERT(ent->next->prev == ent);
ASSERT(ent->prev->next == ent);
list_del(ent);
page = frame_table + (cur_address >> PAGE_SHIFT);
page->flags = dom | PGT_writeable_page | PG_need_flush;
- page->type_count = page->tot_count = 1;
+ set_page_type_count(page, 1);
+ set_page_tot_count(page, 1);
/* Set up the MPT entry. */
machine_to_phys_mapping[cur_address >> PAGE_SHIFT] = count;
*l1tab = mk_l1_pgentry(l1_pgentry_val(*l1tab) & ~_PAGE_RW);
page = frame_table + l1_pgentry_to_pagenr(*l1tab);
page->flags = dom | PGT_l1_page_table;
- page->tot_count++;
+ get_page_tot(page);
l1tab++;
if( !((unsigned long)l1tab & (PAGE_SIZE - 1)) )
{
l2tab++;
}
}
- page->type_count |= REFCNT_PIN_BIT;
- page->tot_count |= REFCNT_PIN_BIT;
- page->flags = dom | PGT_l2_page_table;
+ get_page_type(page); /* guest_pinned */
+ get_page_tot(page); /* guest_pinned */
+ page->flags = dom | PG_guest_pinned | PGT_l2_page_table;
unmap_domain_mem(l1start);
/* Set up shared info area. */
* physical page frame by a domain, including uses as a page directory,
* a page table, or simple mappings via a PTE. This count prevents a
* domain from releasing a frame back to the hypervisor's free pool when
- * it is still referencing it!
+ * it still holds a reference to it.
*
* TYPE_COUNT is more subtle. A frame can be put to one of three
* mutually-exclusive uses: it might be used as a page directory, or a
#include <asm/domain_page.h>
#if 0
-#define MEM_LOG(_f, _a...) printk("DOM%d: (file=memory.c, line=%d) " _f "\n", current->domain, __LINE__, ## _a )
+#define MEM_LOG(_f, _a...)
+ printk("DOM%d: (file=memory.c, line=%d) " _f "\n", \
+ current->domain, __LINE__, ## _a )
#else
#define MEM_LOG(_f, _a...) ((void)0)
#endif
page = frame_table + pfn;
ASSERT((page->flags & PG_type_mask) == PGT_ldt_page);
ASSERT((page->flags & PG_domain_mask) == p->domain);
- ASSERT((page->type_count != 0) && (page->tot_count != 0));
+ ASSERT((page_type_count(page) != 0) && (page_tot_count(page) != 0));
put_page_type(page);
put_page_tot(page);
}
page = frame_table + (l1e >> PAGE_SHIFT);
if ( unlikely((page->flags & PG_type_mask) != PGT_ldt_page) )
{
- if ( unlikely(page->type_count != 0) )
+ if ( unlikely(page_type_count(page) != 0) )
goto out;
/* Check all potential LDT entries in the page. */
type);
return -1;
}
- ASSERT((page_type_count(page) & ~REFCNT_PIN_BIT) != 0);
+ ASSERT(page_type_count(page) != 0);
put_page_tot(page);
return put_page_type(page);
}
page = frame_table + page_nr;
ASSERT(DOMAIN_OKAY(page->flags));
ASSERT((!writeable) ||
- (((page_type_count(page) & ~REFCNT_PIN_BIT) != 0) &&
+ ((page_type_count(page) != 0) &&
((page->flags & PG_type_mask) == PGT_writeable_page) &&
((page->flags & PG_need_flush) == PG_need_flush)));
if ( writeable )
switch ( cmd )
{
case MMUEXT_PIN_L1_TABLE:
- if ( unlikely(page->type_count & REFCNT_PIN_BIT) )
+ if ( unlikely(page->flags & PG_guest_pinned) )
{
MEM_LOG("Pfn %08lx already pinned", pfn);
err = 1;
goto mark_as_pinned;
case MMUEXT_PIN_L2_TABLE:
- if ( unlikely(page->type_count & REFCNT_PIN_BIT) )
+ if ( unlikely(page->flags & PG_guest_pinned) )
{
MEM_LOG("Pfn %08lx already pinned", pfn);
err = 1;
MEM_LOG("Error while pinning pfn %08lx", pfn);
break;
}
- put_page_type(page);
- put_page_tot(page);
- page->type_count |= REFCNT_PIN_BIT;
- page->tot_count |= REFCNT_PIN_BIT;
+ page->flags |= PG_guest_pinned;
break;
case MMUEXT_UNPIN_TABLE:
- if ( !DOMAIN_OKAY(page->flags) )
+ if ( unlikely(!DOMAIN_OKAY(page->flags)) )
{
err = 1;
MEM_LOG("Page %08lx bad domain (dom=%ld)",
ptr, page->flags & PG_domain_mask);
}
- else if ( (page->type_count & REFCNT_PIN_BIT) )
+ else if ( likely(page->flags & PG_guest_pinned) )
{
- page->type_count &= ~REFCNT_PIN_BIT;
- page->tot_count &= ~REFCNT_PIN_BIT;
- get_page_type(page);
- get_page_tot(page);
+ page->flags &= ~PG_guest_pinned;
((page->flags & PG_type_mask) == PGT_l1_page_table) ?
put_l1_table(pfn) : put_l2_table(pfn);
}
mk_l2_pgentry(req.val));
break;
default:
- if ( page->type_count == 0 )
+ if ( page_type_count(page) == 0 )
{
*(unsigned long *)req.ptr = req.val;
err = 0;
/* If reading into the frame, the frame must be writeable. */
if ( writeable_buffer &&
((page->flags & PG_type_mask) != PGT_writeable_page) &&
- (page->type_count != 0) )
+ (page_type_count(page) != 0) )
{
DPRINTK("non-writeable page passed for block read\n");
goto out;
page = frame_table + pfn;
if ( writeable_buffer )
{
- if ( page->type_count == 0 )
+ if ( page_type_count(page) == 0 )
{
page->flags &= ~PG_type_mask;
/* No need for PG_need_flush here. */
unsigned long type_count; /* pagetable/dir, or domain-writeable refs. */
} frame_table_t;
-/*
- * We use a high bit to indicate that a page is pinned.
- * We do not use the top bit as that would mean that we'd get confused with
- * -ve error numbers in some places in common/memory.c.
- */
-#define REFCNT_PIN_BIT 0x40000000UL
-
#define get_page_tot(p) ((p)->tot_count++)
#define put_page_tot(p) \
({ ASSERT((p)->tot_count != 0); --(p)->tot_count; })
#define PG_slab 24
/* domain flags (domain != 0) */
/*
- * NB. The following three flags are MUTUALLY EXCLUSIVE!
+ * NB. The following page types are MUTUALLY EXCLUSIVE.
* At most one can be true at any point, and 'type_count' counts how many
- * references exist of teh current type. A change in type can only occur
+ * references exist of the current type. A change in type can only occur
* when type_count == 0.
*/
#define PG_type_mask (15<<24) /* bits 24-27 */
*/
#define PG_need_flush (1<<28)
+/*
+ * This bit indicates that the guest OS has pinned the page to its current
+ * type. For page tables this can avoid the frame scanning and reference-count
+ * updates that occur when the type count falls to zero.
+ */
+#define PG_guest_pinned (1<<29)
+
#define PageSlab(page) test_bit(PG_slab, &(page)->flags)
#define PageSetSlab(page) set_bit(PG_slab, &(page)->flags)
#define PageClearSlab(page) clear_bit(PG_slab, &(page)->flags)
#define SHARE_PFN_WITH_DOMAIN(_pfn, _dom) \
do { \
(_pfn)->flags = (_dom) | PGT_writeable_page | PG_need_flush; \
- (_pfn)->tot_count = (_pfn)->type_count = 2; \
+ set_page_tot_count((_pfn), 2); \
+ set_page_type_count((_pfn), 2); \
} while ( 0 )
-#define UNSHARE_PFN(_pfn) \
- (_pfn)->flags = (_pfn)->type_count = (_pfn)->tot_count = 0
+#define UNSHARE_PFN(_pfn) \
+ do { \
+ (_pfn)->flags = 0; \
+ set_page_tot_count((_pfn), 0); \
+ set_page_type_count((_pfn), 0); \
+ } while ( 0 )
/* The array of struct pfn_info,
* free pfn list and number of free pfns in the free list
}
/* Give the new page to the domain, marking it writeable. */
- new_page->tot_count = new_page->type_count = 1;
+ set_page_type_count(new_page, 1);
+ set_page_tot_count(new_page, 1);
new_page->flags = vif->domain->domain | PGT_writeable_page | PG_need_flush;
list_add(&new_page->list, &vif->domain->pg_head);
if ( ((buf_page->flags & (PG_type_mask | PG_domain_mask)) !=
(PGT_writeable_page | p->domain)) ||
- (buf_page->tot_count != 1) )
+ (page_tot_count(buf_page) != 1) )
{
DPRINTK("Need a mapped-once writeable page (%ld/%ld/%08lx)\n",
- buf_page->type_count, buf_page->tot_count,
+ page_type_count(buf_page), page_tot_count(buf_page),
buf_page->flags);
make_rx_response(vif, rx.id, 0, RING_STATUS_BAD_PAGE, 0);
goto rx_unmap_and_continue;
get_page_type(pte_page);
get_page_tot(pte_page);
*ptep &= ~_PAGE_PRESENT;
- buf_page->flags = buf_page->type_count = buf_page->tot_count = 0;
+ buf_page->flags = 0;
+ set_page_type_count(buf_page, 0);
+ set_page_tot_count(buf_page, 0);
list_del(&buf_page->list);
vif->rx_shadow_ring[j].id = rx.id;
*pte = (rx->buf_pfn<<PAGE_SHIFT) | (*pte & ~PAGE_MASK) |
_PAGE_RW | _PAGE_PRESENT;
page->flags |= PGT_writeable_page | PG_need_flush;
- page->type_count = page->tot_count = 1;
+ set_page_type_count(page, 1);
+ set_page_tot_count(page, 1);
}
unmap_domain_mem(pte);
spin_lock_irqsave(&free_list_lock, flags);
- pf->flags = pf->type_count = pf->tot_count = 0;
+ pf->flags = 0;
+ set_page_type_count(pf, 0);
+ set_page_tot_count(pf, 0);
list_add(&pf->list, &free_list);
free_pfns++;